import os
import numpy as np
import pandas as pd
import seaborn as sns
import pymc3 as pm
import theano.tensor as tt
import scipy.stats as st
import theano
from sklearn.metrics import confusion_matrix as conf_mat
%pylab inline
%config InlineBackend.figure_format = 'retina'
# %qtconsole --colors=linux
%load_ext watermark
plt.style.use('ggplot')
plt.rc('text', usetex=False)
plt.rc('font', family='serif')
%watermark -v -m -p numpy,pandas,seaborn,pymc3,matplotlib,theano
# load subject table
Subjinfo = pd.read_csv('data/participant_info.csv', ',')
Subjinfo.head()
isbj = 0
datatmp = pd.read_csv(Subjinfo['dynamic_static'][isbj], '\t',
header=None)
# change column name
column_name = ['Trial', 'Resp', 'ACC', 'RT', 'cRect1', 'cRect2', 'cRect3', 'cRect4',
'Randseq', 'Task']
datatmp.columns = column_name
# change stimuli name
stim_list = pd.read_csv('experiment_code/stimuli_list.csv', ',')
datatmp['Expression'] = np.asarray(
stim_list['Expression'].loc[datatmp['Randseq']-1])
# change response name
resp_list = np.asarray(
['Fear', 'Anger', 'Disgust', 'Happiness', 'Sadness', 'Surprise'])
Resp = []
for iresp in datatmp['Resp']:
if np.isfinite(iresp):
Resp.append(resp_list[int(iresp-1)])
else:
Resp.append(np.nan)
datatmp['Resp_Exp'] = np.asarray(Resp)
# change task name
Taskname = np.asarray(['Dynamic', 'Shuffled', 'Static'])
datatmp['Task_nam'] = Taskname[datatmp['Task']-1]
# show dataframe (first 5 lines)
datatmp.head(5)
taskorder = datatmp.Task.unique() - 1
print('The task order is:' + str(taskorder))
labellist = np.asarray(
['Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise'])
for ij, itask in enumerate(Taskname):
datatask = datatmp[datatmp['Task_nam'] == itask]
conf_subj = conf_mat(datatask['Expression'],
datatask['Resp_Exp'], labels=labellist)
conf_df = pd.DataFrame(conf_subj, columns=labellist, index=labellist)
conf_df['trials'] = datatask['Expression'].value_counts()
print('\n'+itask)
print(conf_df)
df_agg = datatask.groupby(['Expression', 'Resp_Exp'])['RT'].agg('mean')
df_rt = df_agg.iloc[df_agg.index.get_level_values(
'Expression') == df_agg.index.get_level_values('Resp_Exp')]
df_rt.iloc[df_rt.index.get_level_values('Resp_Exp')==labellist]
def loadsbj(filepath, labellist, column_name, stim_list, Taskname):
datatmp = pd.read_csv(filepath, '\t',
header=None)
# change column name
datatmp.columns = column_name
# change stimuli name
datatmp['Expression'] = np.asarray(
stim_list['Expression'].loc[datatmp['Randseq']-1])
# change response name
Resp = []
for iresp in datatmp['Resp']:
if np.isfinite(iresp):
Resp.append(resp_list[int(iresp-1)])
else:
Resp.append(np.nan)
datatmp['Resp_Exp'] = np.asarray(Resp)
# change task name
datatmp['Task_nam'] = Taskname[datatmp['Task']-1]
taskorder = datatmp.Task.unique() - 1
conf_all = np.zeros((3, len(labellist), len(labellist)+1))
rt_all = np.zeros((3, len(labellist)))
for ij, itask in enumerate(Taskname):
datatask = datatmp[datatmp['Task_nam'] == itask]
if datatask.shape[0] > 0:
conf_subj = conf_mat(
datatask['Expression'], datatask['Resp_Exp'], labels=labellist)
conf_df = pd.DataFrame(
conf_subj, columns=labellist, index=labellist)
conf_df['trials'] = datatask['Expression'].value_counts()
conf_all[ij] = np.asarray(conf_df)
df_agg = datatask.groupby(['Expression', 'Resp_Exp'])[
'RT'].agg('mean')
df_rt = df_agg.iloc[df_agg.index.get_level_values(
'Expression') == df_agg.index.get_level_values('Resp_Exp')]
rtval = []
for label in labellist:
rt_ = df_rt.iloc[df_rt.index.get_level_values(
'Resp_Exp') == label].values
if rt_.size>0:
rtval.append(rt_)
else:
rtval.append(np.nan)
rt_all[ij] = np.asarray(rtval,dtype=float64).squeeze()
return taskorder, np.asarray(conf_all), rt_all
Taskname = np.asarray(['Dynamic', 'Shuffled', 'Static'])
resp_list = np.asarray(
['Fear', 'Anger', 'Disgust', 'Happiness', 'Sadness', 'Surprise'])
column_name = ['Trial', 'Resp', 'ACC', 'RT', 'cRect1', 'cRect2', 'cRect3', 'cRect4',
'Randseq', 'Task']
stim_list = pd.read_csv('experiment_code/stimuli_list.csv', ',')
labellist = np.asarray(
['Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise'])
filepath = Subjinfo['dynamic_static'][isbj]
taskorder, conf_all, rt_all = loadsbj(
filepath, labellist, column_name, stim_list, Taskname)
print(taskorder)
print(conf_all)
print(rt_all)
Sbjall_txt = Subjinfo['dynamic_static']
confusionall = []
taskorderall = []
rtall = []
for filepath in Sbjall_txt:
taskorder, conf_all, rtval = loadsbj(
filepath, labellist, column_name, stim_list, Taskname)
confusionall.append(conf_all)
taskorderall.append(taskorder)
rtall.append(rtval)
(Subjinfo.groupby(['group'])['age']
.agg(['count', 'min', 'max', 'mean', 'std'])).round(3)
(Subjinfo.groupby(['group', 'gender'])['age']
.agg(['count', 'min', 'max', 'mean', 'std'])).round(3)
subject_info = Subjinfo.iloc[:, :4]
subject_info['taskorder'] = pd.Series(taskorderall)
confusionall = np.asarray(confusionall)
rtall = np.asarray(rtall)
print(subject_info.head(5))
print(confusionall.shape)
print(subject_info.shape)
nsbj = subject_info.shape[0]
subject_info['exclude'] = np.zeros(nsbj)
for isbj, taskorder, confmat in zip(range(nsbj), subject_info['taskorder'], confusionall):
emptyresp = np.where(np.sum(confmat[taskorder[0]], axis=0)[:6] == 0)
if emptyresp[0].size != 0:
print('Sbj ' + str(isbj) + ' missing expression ' +
str(labellist[emptyresp[0]]))
subject_info.loc[isbj, ('exclude')] = 1
emptytask = np.where(confmat[:, :, 6].sum(1) == 0)
if emptytask[0].size != 0:
print('Sbj ' + str(isbj) + ' missing task ' +
str(Taskname[emptytask[0]]))
subject_info.loc[isbj, ('exclude')] = 1
# print(subject_info.loc[subject_info['exclude']==1])
include = subject_info['exclude'] == 0
Info_tbl = pd.DataFrame.copy(subject_info.loc[include]).reset_index()
Conf_mat = confusionall[include, :, :, :]
RT_mat = rtall[include, :]
(Info_tbl.groupby(['group', 'gender'])['age']
.agg(['count', 'min', 'max', 'mean', 'std'])).round(3)
(Info_tbl.groupby(['group'])['age']
.agg(['count', 'min', 'max', 'mean', 'std'])).round(3)
exp_list = np.asarray(
['Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise'])
resp_list = np.asarray(
['Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise', 'Null'])
Tasksel = ['Dynamic', 'Static', 'Shuffled']
grplist = ['Deaf', 'Hearing']
Ng = len(grplist)
Ntask = len(Tasksel)
Nresp = len(resp_list)
Nexp = len(exp_list)
for igroup, tblage in Info_tbl.groupby('group'):
Conftmp = Conf_mat[tblage.index, :, :, :6]
Conftrl = Conf_mat[tblage.index, :, :, 6]
tmp = Conftrl-np.sum(Conftmp, axis=3)
Conftmp2 = np.concatenate((Conftmp, np.expand_dims(tmp, axis=3)), axis=3)
trialnum = np.reshape(np.repeat(Conftrl, 7, axis=2),
newshape=Conftmp2.shape)
Confgroup = Conftmp2/trialnum
if sum(Confgroup > 1) > 0:
print('Error at '+str(igroup))
_, axes = plt.subplots(1, 3, figsize=(18, 6))
plt.text(-19, -1, 'A) '+str(igroup) + ' Observers', fontsize=15)
for ij, itask in enumerate(Tasksel):
idx = np.where(Taskname == itask)[0][0]
mattmp = Confgroup[:, idx, :, :]
meanmat = np.mean(mattmp[np.isfinite(mattmp[:, 0, 0]), :, :], axis=0)
conf_mean = pd.DataFrame(meanmat, columns=resp_list, index=exp_list)
sns.heatmap(conf_mean, annot=True, vmin=0, vmax=.95, cbar=False, square=True,
linewidths=.1, cmap='viridis', ax=axes[ij])
axes[ij].set_title(itask)
axes[0].set_xlabel('Response')
axes[0].set_ylabel('Presented Expression')
plt.savefig('figs/ConfMat'+str(igroup)+'.svg', format='svg', dpi=300)
Conftmp = Conf_mat[:, :, :, :6]
# NaN response as ignore
# Conftrl = np.sum(Conftmp,axis=3)
# trialnum = np.reshape(np.repeat(Conftrl,6,axis=2),newshape=Conftmp.shape)
# NaN response as incorrect
trialnum = np.reshape(
np.repeat(Conf_mat[:, :, :, 6], 6, axis=2), newshape=Conftmp.shape)
Confmean = Conftmp/trialnum
itask = 'Dynamic'
print('Preparing '+str(itask)+' condition')
idx = np.where(Taskname == itask)[0][0]
mattmp = Confmean[:, idx, :, :]
rtmaptmp = RT_mat[:, idx, :]
datatbl1 = pd.DataFrame.copy(Info_tbl)
for ic, emolabel in enumerate(exp_list):
datatbl1[emolabel] = mattmp[:, ic, ic]
datatbl1[emolabel+'RT'] = rtmaptmp[:, ic]
itask = 'Static'
print('Preparing '+str(itask)+' condition')
idx = np.where(Taskname == itask)[0][0]
mattmp = Confmean[:, idx, :, :]
rtmaptmp = RT_mat[:, idx, :]
datatbl2 = pd.DataFrame.copy(Info_tbl)
for ic, emolabel in enumerate(exp_list):
datatbl2[emolabel] = mattmp[:, ic, ic]
datatbl2[emolabel+'RT'] = rtmaptmp[:, ic]
itask = 'Shuffled'
print('Preparing '+str(itask)+' condition')
idx = np.where(Taskname == itask)[0][0]
mattmp = Confmean[:, idx, :, :]
rtmaptmp = RT_mat[:, idx, :]
datatbl3 = pd.DataFrame.copy(Info_tbl)
for ic, emolabel in enumerate(exp_list):
datatbl3[emolabel] = mattmp[:, ic, ic]
datatbl3[emolabel+'RT'] = rtmaptmp[:, ic]
plt.rcParams.update({'font.size': 15})
datatbl1['Task'] = 'Dynamic'
datatbl2['Task'] = 'Static'
datatbl3['Task'] = 'Shuffled'
datatbl_123 = pd.concat([datatbl1, datatbl2, datatbl3])
_, axes = plt.subplots(2, 3, figsize=(18, 12), sharey=True)
ax = axes.flatten()
for ic, emolabel in enumerate(exp_list):
sns.pointplot(x='group', y=emolabel, hue='Task', dodge=.3, linestyles='--',
data=datatbl_123, estimator=np.mean, ci=95, n_boot=1000, ax=ax[ic])
if (ic == 0) | (ic == 3):
ax[ic].set_ylabel('Accurate Identification (%)')
ax[ic].set_yticks([.2, .4, .6, .8, 1.])
ax[ic].set_yticklabels(['20', '40', '60', '80', '100'])
else:
ax[ic].set_ylabel('')
if ic > 0:
ax[ic].get_legend().remove()
ax[ic].set_ylim(1/6, 1)
ax[ic].set_title(emolabel)
plt.tight_layout()
# plt.savefig('./figs/Average_by_group.svg', format='svg', dpi=300)
_, axes = plt.subplots(2, 3, figsize=(18, 12), sharey=True)
ax = axes.flatten()
for ic, emolabel in enumerate(exp_list):
sns.pointplot(x='Task', y=emolabel, hue='group', dodge=.15, linestyles='--',
data=datatbl_123, estimator=np.mean, ci=95, n_boot=1000, ax=ax[ic])
if (ic == 0) | (ic == 3):
ax[ic].set_ylabel('Accurate Identification (%)')
ax[ic].set_yticks([.2, .4, .6, .8, 1.])
ax[ic].set_yticklabels(['20', '40', '60', '80', '100'])
else:
ax[ic].set_ylabel('')
if ic > 0:
ax[ic].get_legend().remove()
ax[ic].set_ylim(1/6, 1)
ax[ic].set_title(emolabel)
plt.tight_layout()
plt.savefig('figs/ACC_identification.svg',
format='svg', dpi=300)
datatbl_123.groupby(['Task', 'group'])[exp_list].agg('mean').round(3)
bootstrap = sns.algorithms.bootstrap
def btCI_025(x):
boots = bootstrap(x, func=np.nanmean, n_boot=1000)
return np.percentile(boots, q=[2.5])[0]
def btCI_975(x):
boots = bootstrap(x, func=np.nanmean, n_boot=1000)
return np.percentile(boots, q=[97.5])[0]
for expname in exp_list:
print(expname)
display(datatbl_123.groupby(['Task', 'group'])[expname]
.agg(['mean', 'std', btCI_025, btCI_975])
.round(3))
_, axes = plt.subplots(2, 3, figsize=(18, 12), sharey=True)
ax = axes.flatten()
for ic, emolabel in enumerate(exp_list):
sns.pointplot(x='Task', y=emolabel+'RT', hue='group', dodge=.15, linestyles='--',
data=datatbl_123, estimator=np.nanmean, ci=95, n_boot=1000, ax=ax[ic])
if (ic == 0) | (ic == 3):
ax[ic].set_ylabel('Reaction Time (s)')
else:
ax[ic].set_ylabel('')
if ic > 0:
ax[ic].get_legend().remove()
ax[ic].set_title(emolabel)
plt.tight_layout()
for expname in exp_list:
print(expname+' RT')
display(datatbl_123.groupby(['Task', 'group'])[expname+'RT']
.agg(['mean', 'std', btCI_025, btCI_975])
.round(3))
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=-1, keepdims=True))
return e_x / e_x.sum(axis=-1, keepdims=True)
ntrial, npredi, nitem = 10, 3, 4
X = np.random.randn(ntrial, npredi)
beta = np.random.randn(npredi, nitem)
yhat = np.matmul(X, beta)
print(softmax(yhat))
print('\n')
print(tt.nnet.softmax(yhat).eval())
Conf_mat[1]
Conf_mat.reshape(-1, 7)[18:36]
dataval = Conf_mat.reshape(-1, 7)
ntrial = dataval[:,-1]
dataval = np.hstack((dataval[:,:6], (dataval[:,-1]-dataval[:,:6].sum(axis=1))[:,None]))
# individual annotator confusion matrices - dominant diagonal
K=6
beta = np.ones( (K,K+1) )
beta[np.diag_indices(K)]+=1
beta
prior = np.tile(beta, (Conf_mat.shape[0]*Conf_mat.shape[1], 1))
groupid = Info_tbl['group_id'].values-1
Conf_mat2 = Conf_mat[:, :, :, :6]/(Conf_mat[:, :, :, -1][:, :, :, None])
grpmean = []
for ig in range(2):
grpmean.append(Conf_mat2[groupid == ig, :, :, :].mean(axis=0))
grpmean = np.asarray(grpmean)
grpmean.shape
def CONVERGENCE_TITLE(bfmi, max_gr, min_effn):
return f'BFMI = {bfmi} \nmax(R_hat) = {max_gr.data} min(Eff_n) = {min_effn.data}'
def get_diags(trace):
bfmi = pm.bfmi(trace)
max_gr = max(np.max(gr_stats)
for gr_stats in pm.rhat(trace).values())
min_effn = min(np.min(ef_stats)
for ef_stats in pm.ess(trace).values())
return bfmi, max_gr, min_effn
def plotgroup_p(postp, itask):
print('Displaying task ' + Taskname[itask])
colorlist = ['C0', 'C1']
_, ax = plt.subplots(6, 6, figsize=(18, 9), sharex=True)
for ij, replabel in enumerate(resp_list[:6]):
for ii, explabel in enumerate(exp_list):
if ii == 0:
ax[ii, ij].set_title(replabel)
if ij == 0:
ax[ii, ij].set_ylabel(explabel)
for ig, grpname in enumerate(grplist):
gvec = postp[:, ig, itask, ii, ij]
sns.distplot(gvec, kde=True, ax=ax[ii, ij], label=grpname)
if ii == ij:
ax[ii, ij].axvline(
grpmean[ig, itask, ii, ij], color=colorlist[ig])
plt.legend()
plt.tight_layout()
def post_dync_adv(postp):
print('Displaying dynamic advantage [Dynamic minus Static]')
_, ax = plt.subplots(6, 6, figsize=(18, 9), sharex=True)
for ij, replabel in enumerate(resp_list[:6]):
for ii, explabel in enumerate(exp_list):
if ii == 0:
ax[ii, ij].set_title(replabel)
if ij == 0:
ax[ii, ij].set_ylabel(explabel)
for ig, grpname in enumerate(grplist):
gvec = postp[:, ig, 0, ii, ij] - postp[:, ig, 2, ii, ij]
sns.distplot(gvec, kde=True, ax=ax[ii, ij], label=grpname)
ax[ii, ij].axvline(0, color='k')
plt.legend()
plt.tight_layout()
plt.rcParams.update({'font.size': 10})
with pm.Model() as model0:
theta = pm.Dirichlet('theta', a=prior, shape=prior.shape)
obs = pm.Multinomial('obs', n=ntrial, p=theta, observed=dataval)
trace0 = pm.sample(1000, tune=2000)
_, ax = plt.subplots(1, 1, figsize=(7, 5))
bfmi, max_gr, min_effn = get_diags(trace0)
pm.energyplot(trace0, ax=ax);
print(CONVERGENCE_TITLE(bfmi, max_gr, min_effn))
trace0['theta'].shape
posterior_p = trace0['theta'].reshape((2000,)+Conf_mat.shape)
posterior_p.shape
postp = []
for ig in range(len(np.unique(groupid))):
postp.append(posterior_p[:, groupid == ig, :, :, :].mean(axis=1))
postp = np.swapaxes(np.asarray(postp), 0, 1)
postp.shape
plotgroup_p(postp, 0)
plotgroup_p(postp, 2)
post_dync_adv(postp)
prior2 = np.tile(beta[None,:,:], (2, 3, 1, 1))
with pm.Model() as model1:
sbj_random = pm.HalfNormal('sbjsd', 1., shape=prior.shape)
grp_task = pm.HalfNormal('grp_task', 1.,
shape=prior2.shape, testval=prior2)
theta = tt.reshape(grp_task[groupid, :, :, :], prior.shape) + sbj_random
# normalization inside Multinomial with
# p = p / tt.sum(p, axis=-1, keepdims=True)
obs = pm.Multinomial('obs', n=ntrial, p=theta, observed=dataval)
trace1 = pm.sample(1000, tune=1000)
_, ax = plt.subplots(1, 1, figsize=(7, 5))
bfmi, max_gr, min_effn = get_diags(trace1)
pm.energyplot(trace1, ax=ax);
print(CONVERGENCE_TITLE(bfmi, max_gr, min_effn))
posterior_p = trace1['grp_task']
postp1 = posterior_p / np.sum(posterior_p, axis=-1, keepdims=True)
plotgroup_p(postp1, 0)
post_dync_adv(postp1)
with pm.Model() as model1_:
sbj_random = pm.Normal('sbjsd', 0., 1., shape=prior.shape)
grp_task = pm.Normal('grp_task', 0., 1.,
shape=prior2.shape, testval=prior2)
alpha = tt.reshape(grp_task[groupid, :, :, :], prior.shape) + sbj_random
theta = tt.nnet.softmax(alpha)
obs = pm.Multinomial('obs', n=ntrial, p=theta, observed=dataval)
trace1_ = pm.sample(1000, tune=1000)
_, ax = plt.subplots(1, 1, figsize=(7, 5))
bfmi, max_gr, min_effn = get_diags(trace1_)
pm.energyplot(trace1_, ax=ax);
print(CONVERGENCE_TITLE(bfmi, max_gr, min_effn))
postp1_ = softmax(trace1_['grp_task'])
plotgroup_p(postp1_, 0)
post_dync_adv(postp1_)
Nsbj = Conf_mat.shape[0]
sbjidx = np.asarray([np.arange(Nsbj)]*Ntask).T
with pm.Model() as model2:
"""The sbject mu here helps convergent"""
# sbj_interp = 0.
sbj_interp = pm.Normal('sbjmu', 0., 1., shape=(Nsbj, 1, 1))
sbj_random = pm.Normal('sbjsd', sbj_interp, 1., shape=(Nsbj, Nexp, Nresp))
grp_task = pm.Normal('grp_task', 0., 1., shape=(Ng, Ntask, Nexp, Nresp))
alpha = tt.reshape(grp_task[groupid] + sbj_random[sbjidx],
prior.shape)
theta = tt.nnet.softmax(alpha)
obs = pm.Multinomial('obs', n=ntrial, p=theta, observed=dataval)
with model2:
trace2 = pm.sample(1000, tune=2000, nuts_kwargs=dict(max_treedepth=15))
# import pickle
# tracedump = dict(trace2=trace2)
# with open('trace_DSS_fullmodel.pickle', 'wb') as f:
# pickle.dump(tracedump, f)
# import pickle
# with open('trace_DSS_fullmodel.pickle', 'rb') as f:
# tracedump = pickle.load(f)
# trace2 = tracedump['trace2']
treedp = trace2.get_sampler_stats('depth', combine=False)
_, ax1 = plt.subplots(1, 1, figsize=(15, 3))
for treedp_ in treedp:
ax1.plot(treedp_)
_, ax = plt.subplots(1, 1, figsize=(7, 5))
bfmi, max_gr, min_effn = get_diags(trace2)
pm.energyplot(trace2, ax=ax);
print(CONVERGENCE_TITLE(bfmi, max_gr, min_effn))
plt.rc('text', usetex=False)
pm.traceplot(trace2, varnames=['sbjmu', 'grp_task']);
dataset_dict = {
"model0": trace0,
"model1": trace1,
"model1_": trace1_,
"model2": trace2
}
comp_df = pm.compare(dataset_dict)
comp_df
comp_df_ = pm.compare(dataset_dict, ic='loo')
comp_df_
$i$ for each task (dynamic, static, shuffle), $j$ for each expression, $k$ for each participant, and $g$ for each participant.
$$ \begin{align*} \text{Observed response} \\ [y_{k,i,j}^1,...,y_{k,i,j}^7] & \sim \text{Multinomial}(n, p_{k,i,j})\\ p_{k,i,j} & = \text{SoftMax}(\mu_{k,i,j})\\ \text{Mixed-effect model} \\ \mu_{k,i,j} & = X^{kg}\beta_{g,i,j} + b_{k,i,j}\\ \text{Group-level coefficients} \\ \beta_{g,i,j} & \sim \text{Normal}(0, 1)\\ \text{Subject-level coefficients} \\ b_{k,i,j} & \sim \text{Normal}(\eta_k, 1)\\ \eta_k & \sim \text{Normal}(0, 1)\\ \end{align*} $$postp2 = softmax(trace2['grp_task'])
def df_group(postp2, itask=0, ie=0):
funcs = [lambda x: pd.Series(np.mean(x, 0), name='mean'),
lambda x: pd.Series(np.std(x, 0), name='sd'),
lambda x: pd.Series(pm.stats.mcse(x.T), name='mc_error'),
lambda x: pd.DataFrame(pm.stats.hpd(flat_vals, .05),
columns=['hpd_2.5', 'hpd_97.5'])]
expname = exp_list[ie]
print('Displaying ' + expname + ' condition in ' +
Taskname[itask] + ' task:')
var_dfs = []
for irsp, rspname in enumerate(resp_list):
for ig, grpname in enumerate(grplist):
vals = postp2[:, ig, itask, ie, irsp]
flat_vals = vals.reshape(vals.shape[0], -1)
var_df = pd.concat([f(flat_vals) for f in funcs], axis=1)
var_df['Respond'] = rspname
var_df['Express'] = expname
var_df['Group'] = grpname
var_dfs.append(var_df)
vals = postp2[:, 0, itask, ie, irsp] - postp2[:, 1, itask, ie, irsp]
flat_vals = vals.reshape(vals.shape[0], -1)
var_df = pd.concat([f(flat_vals) for f in funcs], axis=1)
var_df['Respond'] = rspname
var_df['Express'] = expname
var_df['Group'] = 'Diff'
var_dfs.append(var_df)
dforg = pd.concat(var_dfs, axis=0)
return dforg.set_index(['Express', 'Respond', 'Group'])
plotgroup_p(postp2, 0)
plt.savefig('figs/Posterior_Dynamic_condition.svg',
format='svg', dpi=300)
df_group(postp2, itask=0, ie=0)
df_group(postp2, itask=0, ie=1)
Display the largest differences:
df_group(postp2, itask=0, ie=5)
plotgroup_p(postp2, 2)
plt.savefig('figs/Posterior_Static_condition.svg',
format='svg', dpi=300)
plotgroup_p(postp2, 1)
plt.savefig('figs/Posterior_Shuffled_condition.svg',
format='svg', dpi=300)
post_dync_adv(postp2)
plt.savefig('figs/Posterior_Differences_Dynamic-Static.svg',
format='svg', dpi=300)
def df_dync_adv(postp2, ie=0):
funcs = [lambda x: pd.Series(np.mean(x, 0), name='mean'),
lambda x: pd.Series(np.std(x, 0), name='sd'),
lambda x: pd.Series(pm.stats.mcse(x.T), name='mc_error'),
lambda x: pd.DataFrame(pm.stats.hpd(flat_vals, .05),
columns=['hpd_2.5', 'hpd_97.5'])]
expname = exp_list[ie]
print('Displaying Dynamic-Static in ' + expname + ' condition:')
var_dfs = []
postp2_ = np.squeeze(postp2[:, :, 0, :, :] - postp2[:, :, 2, :, :])
for irsp, rspname in enumerate(resp_list):
for ig, grpname in enumerate(grplist):
vals = postp2_[:, ig, ie, irsp]
flat_vals = vals.reshape(vals.shape[0], -1)
var_df = pd.concat([f(flat_vals) for f in funcs], axis=1)
var_df['Respond'] = rspname
var_df['Express'] = expname
var_df['Group'] = grpname
var_dfs.append(var_df)
vals = postp2_[:, 0, ie, irsp] - postp2_[:, 1, ie, irsp]
flat_vals = vals.reshape(vals.shape[0], -1)
var_df = pd.concat([f(flat_vals) for f in funcs], axis=1)
var_df['Respond'] = rspname
var_df['Express'] = expname
var_df['Group'] = 'Diff'
var_dfs.append(var_df)
dforg = pd.concat(var_dfs, axis=0)
return dforg.set_index(['Express', 'Respond', 'Group'])
df_dync_adv(postp2, ie=5)